if (!(start_info.flags & SIF_PRIVILEGED))
return -ENXIO;
- if (file->private_data == NULL)
- file->private_data = (void *)(unsigned long)DOMID_IO;
-
/* DONTCOPY is essential for Xen as copy_page_range is broken. */
vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset,
vma->vm_end-vma->vm_start, vma->vm_page_prot,
- (domid_t)file->private_data))
+ DOMID_IO))
return -EAGAIN;
return 0;
}
-static int ioctl_mem(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case _IO('M', 1): file->private_data = (void *)arg; break;
- default: return -ENOSYS;
- }
- return 0;
-}
#endif /* CONFIG_XEN */
/*
write: write_mem,
mmap: mmap_mem,
open: open_mem,
-#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
- ioctl: ioctl_mem,
-#endif
};
static struct file_operations kmem_fops = {
if (!(start_info.flags & SIF_PRIVILEGED))
return -ENXIO;
- if (file->private_data == NULL)
- file->private_data = (void *)(unsigned long)DOMID_IO;
-
/* DONTCOPY is essential for Xen as copy_page_range is broken. */
vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset,
vma->vm_end-vma->vm_start, vma->vm_page_prot,
- (domid_t)(unsigned long)file->private_data))
+ DOMID_IO))
return -EAGAIN;
return 0;
}
-
-static int ioctl_mem(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case _IO('M', 1): file->private_data = (void *)arg; break;
- default: return -ENOSYS;
- }
- return 0;
-}
#endif /* CONFIG_XEN */
extern long vread(char *buf, char *addr, unsigned long count);
.write = write_mem,
.mmap = mmap_mem,
.open = open_mem,
-#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
- .ioctl = ioctl_mem,
-#endif
};
static struct file_operations kmem_fops = {
unsigned long symtab_len;
};
-static int parseelfimage(char *elfbase,
- unsigned long elfsize,
- struct domain_setup_info *dsi);
-static int loadelfimage(char *elfbase, void *pmh, unsigned long *parray,
- unsigned long vstart);
-static int loadelfsymtab(char *elfbase, void *pmh, unsigned long *parray,
- struct domain_setup_info *dsi);
+static int
+parseelfimage(
+ char *elfbase, unsigned long elfsize, struct domain_setup_info *dsi);
+static int
+loadelfimage(
+ char *elfbase, int xch, u32 dom, unsigned long *parray,
+ unsigned long vstart);
+static int
+loadelfsymtab(
+ char *elfbase, int xch, u32 dom, unsigned long *parray,
+ struct domain_setup_info *dsi);
static long get_tot_pages(int xc_handle, u32 domid)
{
return (ret < 0) ? -1 : op.u.getmemlist.num_pfns;
}
-static int copy_to_domain_page(void *pm_handle,
+static int copy_to_domain_page(int xc_handle,
+ u32 domid,
unsigned long dst_pfn,
void *src_page)
{
- void *vaddr = map_pfn_writeable(pm_handle, dst_pfn);
+ void *vaddr = xc_map_foreign_range(
+ xc_handle, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
if ( vaddr == NULL )
return -1;
memcpy(vaddr, src_page, PAGE_SIZE);
- unmap_pfn(pm_handle, vaddr);
+ munmap(vaddr, PAGE_SIZE);
return 0;
}
start_info_t *start_info;
shared_info_t *shared_info;
mmu_t *mmu = NULL;
- void *pm_handle=NULL;
int rc;
unsigned long nr_pt_pages;
VMASST_TYPE_writable_pagetables);
if (dsi.load_bsd_symtab)
- loadelfsymtab(image, NULL, NULL, &dsi);
+ loadelfsymtab(image, xc_handle, dom, NULL, &dsi);
if ( (dsi.v_start & (PAGE_SIZE-1)) != 0 )
{
goto error_out;
}
- if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
- goto error_out;
-
if ( (page_array = malloc(nr_pages * sizeof(unsigned long))) == NULL )
{
PERROR("Could not allocate memory");
goto error_out;
}
- loadelfimage(image, pm_handle, page_array, dsi.v_start);
+ loadelfimage(image, xc_handle, dom, page_array, dsi.v_start);
if (dsi.load_bsd_symtab)
- loadelfsymtab(image, pm_handle, page_array, &dsi);
+ loadelfsymtab(image, xc_handle, dom, page_array, &dsi);
/* Load the initial ramdisk image. */
if ( initrd_len != 0 )
PERROR("Error reading initrd image, could not");
goto error_out;
}
- copy_to_domain_page(pm_handle,
+ copy_to_domain_page(xc_handle, dom,
page_array[i>>PAGE_SHIFT], page);
}
}
ctxt->pt_base = l2tab;
/* Initialise the page tables. */
- if ( (vl2tab = map_pfn_writeable(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
+ if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ|PROT_WRITE,
+ l2tab >> PAGE_SHIFT)) == NULL )
goto error_out;
memset(vl2tab, 0, PAGE_SIZE);
vl2e = &vl2tab[l2_table_offset(dsi.v_start)];
{
l1tab = page_array[ppt_alloc++] << PAGE_SHIFT;
if ( vl1tab != NULL )
- unmap_pfn(pm_handle, vl1tab);
- if ( (vl1tab = map_pfn_writeable(pm_handle,
- l1tab >> PAGE_SHIFT)) == NULL )
+ munmap(vl1tab, PAGE_SIZE);
+ if ( (vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ|PROT_WRITE,
+ l1tab >> PAGE_SHIFT)) == NULL )
+ {
+ munmap(vl2tab, PAGE_SIZE);
goto error_out;
+ }
memset(vl1tab, 0, PAGE_SIZE);
vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
*vl2e++ = l1tab | L2_PROT;
*vl1e &= ~_PAGE_RW;
vl1e++;
}
- unmap_pfn(pm_handle, vl1tab);
- unmap_pfn(pm_handle, vl2tab);
+ munmap(vl1tab, PAGE_SIZE);
+ munmap(vl2tab, PAGE_SIZE);
/* Write the phys->machine and machine->phys table entries. */
physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
- physmap = physmap_e =
- map_pfn_writeable(pm_handle, page_array[physmap_pfn++]);
+ physmap = physmap_e = xc_map_foreign_range(
+ xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
+ page_array[physmap_pfn++]);
for ( count = 0; count < nr_pages; count++ )
{
if ( add_mmu_update(xc_handle, mmu,
(page_array[count] << PAGE_SHIFT) |
MMU_MACHPHYS_UPDATE, count) )
+ {
+ munmap(physmap, PAGE_SIZE);
goto error_out;
+ }
*physmap_e++ = page_array[count];
if ( ((unsigned long)physmap_e & (PAGE_SIZE-1)) == 0 )
{
- unmap_pfn(pm_handle, physmap);
- physmap = physmap_e =
- map_pfn_writeable(pm_handle, page_array[physmap_pfn++]);
+ munmap(physmap, PAGE_SIZE);
+ physmap = physmap_e = xc_map_foreign_range(
+ xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
+ page_array[physmap_pfn++]);
}
}
- unmap_pfn(pm_handle, physmap);
+ munmap(physmap, PAGE_SIZE);
/*
* Pin down l2tab addr as page dir page - causes hypervisor to provide
l2tab | MMU_EXTENDED_COMMAND, MMUEXT_PIN_L2_TABLE) )
goto error_out;
- start_info = map_pfn_writeable(
- pm_handle, page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
+ start_info = xc_map_foreign_range(
+ xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
+ page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
memset(start_info, 0, sizeof(*start_info));
start_info->nr_pages = nr_pages;
start_info->shared_info = shared_info_frame << PAGE_SHIFT;
}
strncpy(start_info->cmd_line, cmdline, MAX_CMDLINE);
start_info->cmd_line[MAX_CMDLINE-1] = '\0';
- unmap_pfn(pm_handle, start_info);
+ munmap(start_info, PAGE_SIZE);
/* shared_info page starts its life empty. */
- shared_info = map_pfn_writeable(pm_handle, shared_info_frame);
+ shared_info = xc_map_foreign_range(
+ xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
memset(shared_info, 0, sizeof(shared_info_t));
/* Mask all upcalls... */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
- unmap_pfn(pm_handle, shared_info);
+ munmap(shared_info, PAGE_SIZE);
/* Send the page update requests down to the hypervisor. */
if ( finish_mmu_updates(xc_handle, mmu) )
goto error_out;
free(mmu);
- (void)close_pfn_mapper(pm_handle);
free(page_array);
*pvsi = vstartinfo_start;
error_out:
if ( mmu != NULL )
free(mmu);
- if ( pm_handle != NULL )
- (void)close_pfn_mapper(pm_handle);
if ( page_array != NULL )
free(page_array);
return -1;
return 0;
}
-static int loadelfimage(char *elfbase, void *pmh, unsigned long *parray,
- unsigned long vstart)
+static int
+loadelfimage(
+ char *elfbase, int xch, u32 dom, unsigned long *parray,
+ unsigned long vstart)
{
Elf_Ehdr *ehdr = (Elf_Ehdr *)elfbase;
Elf_Phdr *phdr;
for ( done = 0; done < phdr->p_filesz; done += chunksz )
{
pa = (phdr->p_vaddr + done) - vstart;
- va = map_pfn_writeable(pmh, parray[pa>>PAGE_SHIFT]);
+ va = xc_map_foreign_range(
+ xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
chunksz = phdr->p_filesz - done;
if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
memcpy(va + (pa & (PAGE_SIZE-1)),
elfbase + phdr->p_offset + done, chunksz);
- unmap_pfn(pmh, va);
+ munmap(va, PAGE_SIZE);
}
for ( ; done < phdr->p_memsz; done += chunksz )
{
pa = (phdr->p_vaddr + done) - vstart;
- va = map_pfn_writeable(pmh, parray[pa>>PAGE_SHIFT]);
+ va = xc_map_foreign_range(
+ xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
chunksz = phdr->p_memsz - done;
if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
memset(va + (pa & (PAGE_SIZE-1)), 0, chunksz);
- unmap_pfn(pmh, va);
+ munmap(va, PAGE_SIZE);
}
}
return 0;
}
-static void map_memcpy(unsigned long dst, char *src, unsigned long size,
- void *pmh, unsigned long *parray, unsigned long vstart)
+static void
+map_memcpy(
+ unsigned long dst, char *src, unsigned long size,
+ int xch, u32 dom, unsigned long *parray, unsigned long vstart)
{
char *va;
unsigned long chunksz, done, pa;
for ( done = 0; done < size; done += chunksz )
{
pa = dst + done - vstart;
- va = map_pfn_writeable(pmh, parray[pa>>PAGE_SHIFT]);
+ va = xc_map_foreign_range(
+ xch, dom, PAGE_SIZE, PROT_WRITE, parray[pa>>PAGE_SHIFT]);
chunksz = size - done;
if ( chunksz > (PAGE_SIZE - (pa & (PAGE_SIZE-1))) )
chunksz = PAGE_SIZE - (pa & (PAGE_SIZE-1));
memcpy(va + (pa & (PAGE_SIZE-1)), src + done, chunksz);
- unmap_pfn(pmh, va);
+ munmap(va, PAGE_SIZE);
}
}
#define ELFROUND (ELFSIZE / 8)
-static int loadelfsymtab(char *elfbase, void *pmh, unsigned long *parray,
- struct domain_setup_info *dsi)
+static int
+loadelfsymtab(
+ char *elfbase, int xch, u32 dom, unsigned long *parray,
+ struct domain_setup_info *dsi)
{
Elf_Ehdr *ehdr = (Elf_Ehdr *)elfbase, *sym_ehdr;
Elf_Shdr *shdr;
if ( (shdr[h].sh_type == SHT_STRTAB) ||
(shdr[h].sh_type == SHT_SYMTAB) )
{
- if ( pmh != NULL )
+ if ( parray != NULL )
map_memcpy(maxva, elfbase + shdr[h].sh_offset, shdr[h].sh_size,
- pmh, parray, dsi->v_start);
+ xch, dom, parray, dsi->v_start);
/* Mangled to be based on ELF header location. */
shdr[h].sh_offset = maxva - dsi->symtab_addr;
goto out;
}
- if ( pmh != NULL ) {
+ if ( parray != NULL )
+ {
*(int *)p = maxva - dsi->symtab_addr;
sym_ehdr = (Elf_Ehdr *)(p + sizeof(int));
memcpy(sym_ehdr, ehdr, sizeof(Elf_Ehdr));
/* Copy total length, crafted ELF header and section header table */
map_memcpy(symva, p, sizeof(int) + sizeof(Elf_Ehdr) +
- ehdr->e_shnum * sizeof(Elf_Shdr), pmh, parray,
+ ehdr->e_shnum * sizeof(Elf_Shdr), xch, dom, parray,
dsi->v_start);
}
mmu_t *mmu = NULL;
- void *pm_handle = NULL;
-
/* used by debug verify code */
unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
}
shared_info_frame = op.u.getdomaininfo.shared_info_frame;
- if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
- goto out;
-
/* Build the pfn-to-mfn table. We choose MFN ordering returned by Xen. */
if ( get_pfn_list(xc_handle, dom, pfn_to_mfn_table, nr_pfns) != nr_pfns )
{
{
unsigned int count, *pfntab;
int rc;
+
if ( xcio_read(ioctxt, &count, sizeof(count)) )
{
xcio_error(ioctxt, "Error when reading from state file");
}
pfntab = malloc( sizeof(unsigned int) * count );
- if ( !pfntab )
+ if ( pfntab == NULL )
{
xcio_error(ioctxt, "Out of memory");
goto out;
goto out;
}
- for(i=0;i<count;i++)
+ for ( i = 0; i < count; i++ )
{
unsigned long pfn = pfntab[i];
pfntab[i]=pfn_to_mfn_table[pfn];
pfn_to_mfn_table[pfn] = 0x80000001; // not in pmap
}
- if ( count>0 )
+ if ( count > 0 )
{
if ( (rc = do_dom_mem_op( xc_handle,
MEMOP_decrease_reservation,
{
printf("Decreased reservation by %d pages\n", count);
}
- }
-
+ }
}
-
-
if ( xcio_read(ioctxt, &ctxt, sizeof(ctxt)) ||
xcio_read(ioctxt, shared_info, PAGE_SIZE) )
{
goto out;
}
ctxt.cpu_ctxt.esi = mfn = pfn_to_mfn_table[pfn];
- p_srec = map_pfn_writeable(pm_handle, mfn);
+ p_srec = xc_map_foreign_range(
+ xc_handle, dom, PAGE_SIZE, PROT_WRITE, mfn);
p_srec->resume_info.nr_pages = nr_pfns;
p_srec->resume_info.shared_info = shared_info_frame << PAGE_SHIFT;
p_srec->resume_info.flags = 0;
- unmap_pfn(pm_handle, p_srec);
+ munmap(p_srec, PAGE_SIZE);
/* Uncanonicalise each GDT frame number. */
if ( ctxt.gdt_ents > 8192 )
}
ctxt.pt_base = pfn_to_mfn_table[pfn] << PAGE_SHIFT;
-
/* clear any pending events and the selector */
- memset( &(((shared_info_t *)shared_info)->evtchn_pending[0]),
- 0, sizeof (((shared_info_t *)shared_info)->evtchn_pending)+
- sizeof(((shared_info_t *)shared_info)->evtchn_pending_sel) );
+ memset(&(((shared_info_t *)shared_info)->evtchn_pending[0]),
+ 0, sizeof (((shared_info_t *)shared_info)->evtchn_pending)+
+ sizeof(((shared_info_t *)shared_info)->evtchn_pending_sel));
/* Copy saved contents of shared-info page. No checking needed. */
- ppage = map_pfn_writeable(pm_handle, shared_info_frame);
+ ppage = xc_map_foreign_range(
+ xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
memcpy(ppage, shared_info, sizeof(shared_info_t));
- unmap_pfn(pm_handle, ppage);
-
+ munmap(ppage, PAGE_SIZE);
/* Uncanonicalise the pfn-to-mfn table frame-number list. */
for ( i = 0; i < (nr_pfns+1023)/1024; i++ )
xc_domain_destroy(xc_handle, dom);
if ( mmu != NULL )
free(mmu);
- if ( pm_handle != NULL )
- (void)close_pfn_mapper(pm_handle);
if ( pfn_to_mfn_table != NULL )
free(pfn_to_mfn_table);
if ( pfn_type != NULL )
#define DPRINTF(x)
#endif
-static int loadelfimage(gzFile, void *, unsigned long *, unsigned long,
+static int loadelfimage(gzFile, int, u32, unsigned long *, unsigned long,
unsigned long *, unsigned long *,
unsigned long *, unsigned long *);
shared_info_t *shared_info;
unsigned long ksize;
mmu_t *mmu = NULL;
- void *pm_handle = NULL;
int i;
- if ( (pm_handle = init_pfn_mapper((domid_t)dom)) == NULL )
- goto error_out;
-
if ( (page_array = malloc(tot_pages * sizeof(unsigned long))) == NULL )
{
PERROR("Could not allocate memory");
goto error_out;
}
- if (loadelfimage(kernel_gfd, pm_handle, page_array, tot_pages,
+ if (loadelfimage(kernel_gfd, xc_handle, dom, page_array, tot_pages,
virt_load_addr, &ksize, &symtab_addr, &symtab_len))
goto error_out;
goto error_out;
/* Initialise the page tables. */
- if ( (vl2tab = map_pfn_writeable(pm_handle, l2tab >> PAGE_SHIFT)) == NULL )
+ if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ|PROT_WRITE,
+ l2tab >> PAGE_SHIFT)) == NULL )
goto error_out;
memset(vl2tab, 0, PAGE_SIZE);
vl2e = &vl2tab[l2_table_offset(*virt_load_addr)];
{
l1tab = page_array[alloc_index--] << PAGE_SHIFT;
if ( vl1tab != NULL )
- unmap_pfn(pm_handle, vl1tab);
- if ( (vl1tab = map_pfn_writeable(pm_handle,
- l1tab >> PAGE_SHIFT)) == NULL )
+ munmap(vl1tab, PAGE_SIZE);
+ if ( (vl1tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+ PROT_READ|PROT_WRITE,
+ l1tab >> PAGE_SHIFT)) == NULL )
+ {
+ munmap(vl2tab, PAGE_SIZE);
goto error_out;
+ }
memset(vl1tab, 0, PAGE_SIZE);
vl1e = &vl1tab[l1_table_offset(*virt_load_addr +
(count<<PAGE_SHIFT))];
if ( add_mmu_update(xc_handle, mmu,
(page_array[count] << PAGE_SHIFT) |
MMU_MACHPHYS_UPDATE, count) )
+ {
+ munmap(vl1tab, PAGE_SIZE);
+ munmap(vl2tab, PAGE_SIZE);
goto error_out;
+ }
}
- unmap_pfn(pm_handle, vl1tab);
- unmap_pfn(pm_handle, vl2tab);
+ munmap(vl1tab, PAGE_SIZE);
+ munmap(vl2tab, PAGE_SIZE);
/*
* Pin down l2tab addr as page dir page - causes hypervisor to provide
*virt_startinfo_addr =
*virt_load_addr + ((alloc_index-1) << PAGE_SHIFT);
- start_info = map_pfn_writeable(pm_handle, page_array[alloc_index-1]);
+ start_info = xc_map_foreign_range(
+ xc_handle, dom, PAGE_SIZE, PROT_WRITE, page_array[alloc_index-1]);
memset(start_info, 0, sizeof(*start_info));
start_info->pt_base = *virt_load_addr + ((tot_pages-1) << PAGE_SHIFT);
start_info->mod_start = symtab_addr;
start_info->domain_controller_evtchn = control_evtchn;
strncpy(start_info->cmd_line, cmdline, MAX_CMDLINE);
start_info->cmd_line[MAX_CMDLINE-1] = '\0';
- unmap_pfn(pm_handle, start_info);
+ munmap(start_info, PAGE_SIZE);
/* shared_info page starts its life empty. */
- shared_info = map_pfn_writeable(pm_handle, shared_info_frame);
+ shared_info = xc_map_foreign_range(
+ xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
memset(shared_info, 0, PAGE_SIZE);
/* Mask all upcalls... */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
- unmap_pfn(pm_handle, shared_info);
+ munmap(shared_info, PAGE_SIZE);
/* Send the page update requests down to the hypervisor. */
if ( finish_mmu_updates(xc_handle, mmu) )
goto error_out;
free(mmu);
- (void)close_pfn_mapper(pm_handle);
free(page_array);
return 0;
error_out:
if ( mmu != NULL )
free(mmu);
- if ( pm_handle != NULL )
- (void)close_pfn_mapper(pm_handle);
if ( page_array == NULL )
free(page_array);
return -1;
#define IS_BSS(p) (p.p_filesz < p.p_memsz)
static int
-loadelfimage(gzFile kernel_gfd, void *pm_handle, unsigned long *page_array,
+loadelfimage(gzFile kernel_gfd, int xch, u32 dom, unsigned long *page_array,
unsigned long tot_pages, unsigned long *virt_load_addr,
unsigned long *ksize, unsigned long *symtab_addr,
unsigned long *symtab_len)
goto out;
}
curpos += c;
- vaddr = map_pfn_writeable(pm_handle,
- page_array[(iva - *virt_load_addr)
- >> PAGE_SHIFT]);
+ vaddr = xc_map_foreign_range(
+ xch, dom, PAGE_SIZE, PROT_WRITE,
+ page_array[(iva - *virt_load_addr) >> PAGE_SHIFT]);
if ( vaddr == NULL )
{
ERROR("Couldn't map guest memory");
DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)iva,
vaddr + (iva & (PAGE_SIZE - 1)), c));
memcpy(vaddr + (iva & (PAGE_SIZE - 1)), page, c);
- unmap_pfn(pm_handle, vaddr);
+ munmap(vaddr, PAGE_SIZE);
}
if ( phdr[h].p_vaddr + phdr[h].p_filesz > maxva )
}
curpos += c;
- vaddr = map_pfn_writeable(pm_handle,
- page_array[(maxva - *virt_load_addr)
- >> PAGE_SHIFT]);
+ vaddr = xc_map_foreign_range(
+ xch, dom, PAGE_SIZE, PROT_WRITE,
+ page_array[(maxva - *virt_load_addr) >> PAGE_SHIFT]);
if ( vaddr == NULL )
{
ERROR("Couldn't map guest memory");
DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)maxva,
vaddr + (maxva & (PAGE_SIZE - 1)), c));
memcpy(vaddr + (maxva & (PAGE_SIZE - 1)), page, c);
- unmap_pfn(pm_handle, vaddr);
+ munmap(vaddr, PAGE_SIZE);
}
*symtab_len += shdr[h].sh_size;
c = PAGE_SIZE - (symva & (PAGE_SIZE - 1));
if ( c > s - i )
c = s - i;
- vaddr = map_pfn_writeable(pm_handle,
- page_array[(symva - *virt_load_addr)
- >> PAGE_SHIFT]);
+ vaddr = xc_map_foreign_range(
+ xch, dom, PAGE_SIZE, PROT_WRITE,
+ page_array[(symva - *virt_load_addr) >> PAGE_SHIFT]);
if ( vaddr == NULL )
{
ERROR("Couldn't map guest memory");
}
DPRINTF(("copy page %p to %p, count 0x%x\n", (void *)symva,
vaddr + (symva & (PAGE_SIZE - 1)), c));
- memcpy(vaddr + (symva & (PAGE_SIZE - 1)), p + i,
- c);
- unmap_pfn(pm_handle, vaddr);
+ memcpy(vaddr + (symva & (PAGE_SIZE - 1)), p + i, c);
+ munmap(vaddr, PAGE_SIZE);
}
*symtab_len = maxva - *symtab_addr;
#include "xc_private.h"
-#define MAX_EXTENTS 8
-typedef struct {
- int fd;
- struct {
- void *base;
- unsigned long length;
- } extent[MAX_EXTENTS];
-} mapper_desc_t;
-
-void *init_pfn_mapper(domid_t domid)
-{
- int fd = open("/dev/mem", O_RDWR);
- mapper_desc_t *desc;
-
- if ( fd < 0 )
- return NULL;
-
- if ( (desc = malloc(sizeof(*desc))) == NULL )
- {
- close(fd);
- return NULL;
- }
-
- (void)ioctl(fd, _IO('M', 1), (unsigned long)domid);
-
- memset(desc, 0, sizeof(*desc));
- desc->fd = fd;
-
- return desc;
-}
-
-int close_pfn_mapper(void *pm_handle)
-{
- mapper_desc_t *desc = pm_handle;
- int i;
-
- for ( i = 0; i < MAX_EXTENTS; i++ )
- {
- if ( desc->extent[i].base != NULL )
- (void)munmap(desc->extent[i].base, desc->extent[i].length);
- }
-
- close(desc->fd);
- free(desc);
-
- return 0;
-}
-
-static int get_free_offset(mapper_desc_t *desc)
-{
- int i;
-
- for ( i = 0; i < MAX_EXTENTS; i++ )
- {
- if ( desc->extent[i].base == NULL )
- break;
- }
-
- if ( i == MAX_EXTENTS )
- {
- fprintf(stderr, "Extent overflow in map_pfn_*()!\n");
- fflush(stderr);
- *(int*)0=0; /* XXX */
- }
-
- return i;
-}
-
-void *map_pfn_writeable(void *pm_handle, unsigned long pfn)
-{
- mapper_desc_t *desc = pm_handle;
- void *vaddr;
- int off;
-
- vaddr = mmap(NULL, PAGE_SIZE, PROT_READ|PROT_WRITE,
- MAP_SHARED, desc->fd, pfn << PAGE_SHIFT);
- if ( vaddr == MAP_FAILED )
- return NULL;
-
- off = get_free_offset(desc);
- desc->extent[off].base = vaddr;
- desc->extent[off].length = PAGE_SIZE;
-
- return vaddr;
-}
-
-void *map_pfn_readonly(void *pm_handle, unsigned long pfn)
-{
- mapper_desc_t *desc = pm_handle;
- void *vaddr;
- int off;
-
- vaddr = mmap(NULL, PAGE_SIZE, PROT_READ,
- MAP_SHARED, desc->fd, pfn << PAGE_SHIFT);
- if ( vaddr == MAP_FAILED )
- return NULL;
-
- off = get_free_offset(desc);
- desc->extent[off].base = vaddr;
- desc->extent[off].length = PAGE_SIZE;
-
- return vaddr;
-}
-
-void unmap_pfn(void *pm_handle, void *vaddr)
-{
- mapper_desc_t *desc = pm_handle;
- int i;
- unsigned long len = 0;
-
- for ( i = 0; i < MAX_EXTENTS; i++ )
- {
- if ( desc->extent[i].base == vaddr )
- {
- desc->extent[i].base = NULL;
- len = desc->extent[i].length;
- }
- }
-
- if ( len == 0 )
- *(int*)0 = 0; /* XXX */
-
- (void)munmap(vaddr, len);
-}
-
-/*******************/
-
void *xc_map_foreign_batch(int xc_handle, u32 dom, int prot,
unsigned long *arr, int num )
{
/*
* PFN mapping.
*/
-void *init_pfn_mapper(domid_t domid);
-int close_pfn_mapper(void *pm_handle);
-void *map_pfn_writeable(void *pm_handle, unsigned long pfn);
-void *map_pfn_readonly(void *pm_handle, unsigned long pfn);
-void unmap_pfn(void *pm_handle, void *vaddr);
int get_pfn_type_batch(int xc_handle, u32 dom, int num, unsigned long *arr);
unsigned long csum_page (void * page);
* @num: number of trace buffers to map
* @size: size of each trace buffer
*
- * Maps the Xen trace buffers them into process address space by memory mapping
- * /dev/mem. Returns the location the buffers have been mapped to.
+ * Maps the Xen trace buffers them into process address space.
*/
struct t_buf *map_tbufs(unsigned long tbufs_mach, unsigned int num,
unsigned long size)